{
extern void subarch_init_memory(struct domain *);
+ unsigned long i, j, pfn, nr_pfns;
+ struct pfn_info *page;
+
memset(percpu_info, 0, sizeof(percpu_info));
/*
/*
* Initialise our DOMID_IO domain.
- * This domain owns no pages but is considered a special case when
- * mapping I/O pages, as the mappings occur at the priv of the caller.
+ * This domain owns I/O pages that are within the range of the pfn_info
+ * array. Mappings occur at the priv of the caller.
*/
dom_io = alloc_domain_struct();
atomic_set(&dom_io->refcnt, 1);
dom_io->id = DOMID_IO;
+ /* First 1MB of RAM is historically marked as I/O. */
+ for ( i = 0; i < 0x100; i++ )
+ {
+ page = &frame_table[i];
+ page->count_info = PGC_allocated | 1;
+ page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
+ page_set_owner(page, dom_io);
+ }
+
+ /* Any non-RAM areas in the e820 map are considered to be for I/O. */
+ for ( i = 0; i < e820.nr_map; i++ )
+ {
+ if ( e820.map[i].type == E820_RAM )
+ continue;
+ pfn = e820.map[i].addr >> PAGE_SHIFT;
+ nr_pfns = (e820.map[i].size +
+ (e820.map[i].addr & ~PAGE_MASK) +
+ ~PAGE_MASK) >> PAGE_SHIFT;
+ for ( j = 0; j < nr_pfns; j++ )
+ {
+ if ( !pfn_valid(pfn+j) )
+ continue;
+ page = &frame_table[pfn+j];
+ page->count_info = PGC_allocated | 1;
+ page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
+ page_set_owner(page, dom_io);
+ }
+ }
+
subarch_init_memory(dom_xen);
}
{
struct pfn_info *page = &frame_table[page_nr];
- if ( unlikely(!pfn_is_ram(page_nr)) )
- {
- MEM_LOG("Pfn %p is not RAM", page_nr);
- return 0;
- }
-
- if ( unlikely(!get_page(page, d)) )
+ if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
{
MEM_LOG("Could not get page ref for pfn %p", page_nr);
return 0;
return 0;
}
- if ( unlikely(!pfn_is_ram(mfn)) )
+ if ( unlikely(!pfn_valid(mfn)) ||
+ unlikely(page_get_owner(page) == dom_io) )
{
- /* Revert to caller privileges if FD == DOMID_IO. */
+ /* DOMID_IO reverts to caller for privilege checks. */
if ( d == dom_io )
d = current->domain;
- if ( IS_PRIV(d) )
- return 1;
+ if ( (!IS_PRIV(d)) &&
+ (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, mfn)) )
+ {
+ MEM_LOG("Non-privileged attempt to map I/O space %08lx", mfn);
+ return 0;
+ }
- if ( IS_CAPABLE_PHYSDEV(d) )
- return domain_iomem_in_pfn(d, mfn);
+ /* No reference counting for out-of-range I/O pages. */
+ if ( !pfn_valid(mfn) )
+ return 1;
- MEM_LOG("Non-privileged attempt to map I/O space %p", mfn);
- return 0;
+ d = dom_io;
}
return ((l1v & _PAGE_RW) ?
struct pfn_info *page = &frame_table[pfn];
struct domain *e;
- if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(pfn) )
+ if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) )
return;
e = page_get_owner(page);
gntref = (grant_ref_t)((val & 0xFF00) | ((ptr >> 2) & 0x00FF));
if ( unlikely(IS_XEN_HEAP_FRAME(page)) ||
- unlikely(!pfn_is_ram(pfn)) ||
+ unlikely(!pfn_valid(pfn)) ||
unlikely((e = find_domain_by_id(domid)) == NULL) )
{
MEM_LOG("Bad frame (%p) or bad domid (%d).\n", pfn, domid);
static inline int mfn_is_page_table(unsigned long mfn)
{
- if ( !pfn_is_ram(mfn) )
+ if ( !pfn_valid(mfn) )
return 0;
return frame_table[mfn].count_info & PGC_page_table;
static inline int mfn_out_of_sync(unsigned long mfn)
{
- if ( !pfn_is_ram(mfn) )
+ if ( !pfn_valid(mfn) )
return 0;
return frame_table[mfn].count_info & PGC_out_of_sync;
if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
!(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) &&
(mfn = l1_pgentry_to_pfn(nl1e)) &&
- pfn_is_ram(mfn) &&
+ pfn_valid(mfn) &&
(owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) &&
(d != owner) )
{
{
u32 x, nx;
- ASSERT(pfn_is_ram(smfn));
+ ASSERT(pfn_valid(smfn));
x = frame_table[smfn].count_info;
nx = x + 1;
{
u32 x, nx;
- ASSERT(pfn_is_ram(smfn));
+ ASSERT(pfn_valid(smfn));
x = frame_table[smfn].count_info;
nx = x - 1;